Tutorial Part III a — SDM with MaxEnt
Course 3 SDM with MaxEnt
Preparations
Load libraries
### The packages (repetition)
library(here)
library(sf)
library(tmap)
library(predicts)
library(terra)
library(viridis)
#library(rgl) # not needed?
#library(unmarked) # not needed?
#library(spatstat) # not needed?
## check that the package raster is detached if you work with terra!
## However, it is later needed for the stack
#library(raster) # not needed?
# detach(package:raster)## only if you want to run MaxEnt from R ##
#install.packages('rJava')
library(rJava)
## please check if you get an error message
## If yes:
## 1. check if Java is installed on your computer
## if not: Download open jdk (https://jdk.java.net/23/),
## download the zip file and unpack to folder 'Java' under
## 'Programme' or 'program files'.
## Note: don’t use 'program files (86x)' - this may be the cause of error
## Then, set the system variable on your computer, -> Systemsteuerung, and on the search bar
## search for 'Umgebungsvariable', and then add a JAVA_HOME path to the path
## where Java is located on your Computer.
## Or set the directory of your Java location BEFORE loading the library:
## check with
#Sys.getenv('JAVA_HOME') ## where the R-package is looking for the java.dll file
## set it to the correct path:
#Sys.setenv(JAVA_HOME='C:\\Program Files (x86)\\Java\\jre1.8.0_1447\\bin')
## and recall the library
#library(rJava)
## http://www.r-bloggers.com/how-to-load-the-rjava-package-after-the-error-java_home-cannot-be-determined-from-the-registry/ Set the workspace
## [1] "C:/Users/wenzler/Documents/GitHub/d6_teaching_collection/R/Course3_a_presence_only_sdm"
root_wd <- here::here()
# relative to work-wd
maps_wd <- paste(root_wd,"/","data/data_borneo/geo_raster_current_asc",sep='') # or:
maps_wd_fut <- paste(root_wd,"/","data/data_borneo/geo_raster_future_asc",sep='')
vecs_wd <- here::here("data","data_borneo","geo_vector") # shapefile
recs_wd <- here::here("data","data_borneo","animal_data") # location data
## the output folder should have been created by you during Tutorial 2 'R goes spatial'.
## It should exist
output_wd <- here::here("output")
# It should contain the hillshade.asc
setwd(output_wd) ## set to the OUTPUT folder!
getwd() # check## [1] "C:/Users/wenzler/Documents/GitHub/d6_teaching_collection/output"
Load spatial data and define the CRS
Load rasters
### Data import - Load spatial data (repetition)
ras1 <- terra::rast(x = paste(maps_wd,'/bio_asc_01.asc',sep=''))
# assign the projection (crs - coordinate reference system)
# ras1@crs <- CRS("+proj=longlat +datum=WGS84") ## not needed in terra!
ras24 <- terra::rast(here("data","data_borneo", "geo_raster_current_asc", "bio_asc_24.asc")) #DEM
ras42 <- terra::rast(x = here("data", "data_borneo", "geo_raster_current_asc", "bio_asc_42.asc")) # land use
#hillsh <- terra::rast(x = paste(maps_wd,'/borneo_hillshade.asc',sep='')) Stack environmental variables
Loading many rasters can be done in one go.
### Raster stack of environmental predictors (repetition)
# the list.files command is very helpful to check what is in the folders
# use 'pattern' for searching for special file types, here .asc-files:
files <- list.files(path= maps_wd, pattern='.asc$',
full.names=TRUE )
files # these are just names! To load them as spatial objects, use rast()## [1] "C:/Users/wenzler/Documents/GitHub/d6_teaching_collection/data/data_borneo/geo_raster_current_asc/bio_asc_01.asc"
## [2] "C:/Users/wenzler/Documents/GitHub/d6_teaching_collection/data/data_borneo/geo_raster_current_asc/bio_asc_21.asc"
## [3] "C:/Users/wenzler/Documents/GitHub/d6_teaching_collection/data/data_borneo/geo_raster_current_asc/bio_asc_22.asc"
## [4] "C:/Users/wenzler/Documents/GitHub/d6_teaching_collection/data/data_borneo/geo_raster_current_asc/bio_asc_24.asc"
## [5] "C:/Users/wenzler/Documents/GitHub/d6_teaching_collection/data/data_borneo/geo_raster_current_asc/bio_asc_27.asc"
## [6] "C:/Users/wenzler/Documents/GitHub/d6_teaching_collection/data/data_borneo/geo_raster_current_asc/bio_asc_42.asc"
## [7] "C:/Users/wenzler/Documents/GitHub/d6_teaching_collection/data/data_borneo/geo_raster_current_asc/borneo_hillshade.asc"
## note that I only load 4 rasters:
# predictors <- c(files[c(9,12,22,24)]) # for full data set
predictors <- terra::rast(x = c(files[c(1,5,6,7)]) ) # for github repository data
predictors## class : SpatRaster
## dimensions : 1425, 1423, 4 (nrow, ncol, nlyr)
## resolution : 0.008333334, 0.008333334 (x, y)
## extent : 108.3341, 120.1925, -4.374013, 7.500988 (xmin, xmax, ymin, ymax)
## coord. ref. : lon/lat WGS 84 (CRS84) (OGC:CRS84)
## sources : bio_asc_01.asc
## bio_asc_27.asc
## bio_asc_42.asc
## borneo_hillshade.asc
## names : bio_asc_01, bio_asc_27, bio_asc_42, borneo_hillshade
Plot rasters
terra::plot(predictors, col = viridis::viridis(100)) ## might take some time depending on your computerLoad shapefiles
### Read in some Shapefiles (repetition)
## package sf (new and maintained)
## Borneo outline polygon
Borneo_shp_sf <- sf::st_read(dsn = vecs_wd,
layer = "borneo_admin",
stringsAsFactors = FALSE)[,c(1:3,5,7,17,18)]## Reading layer `borneo_admin' from data source
## `C:\Users\wenzler\Documents\GitHub\d6_teaching_collection\data\data_borneo\geo_vector'
## using driver `ESRI Shapefile'
## Simple feature collection with 158 features and 18 fields
## Geometry type: MULTIPOLYGON
## Dimension: XY
## Bounding box: xmin: 108.5986 ymin: -4.943054 xmax: 119.2697 ymax: 7.380556
## Geodetic CRS: WGS 84
# Protected areas (PA) polygon
PA_shp_sf <- sf::st_read(dsn = vecs_wd,
layer = "Bor_PA",
stringsAsFactors = FALSE)[, c(1:4)]## Reading layer `Bor_PA' from data source
## `C:\Users\wenzler\Documents\GitHub\d6_teaching_collection\data\data_borneo\geo_vector'
## using driver `ESRI Shapefile'
## Simple feature collection with 255 features and 12 fields
## Geometry type: MULTIPOLYGON
## Dimension: XY
## Bounding box: xmin: 108.5969 ymin: -4.18356 xmax: 119.6176 ymax: 9.911229
## Geodetic CRS: WGS 84
# Rivers lines
River_shp_sf <- sf::st_read(dsn = vecs_wd,
layer = "sn_100000",
stringsAsFactors = FALSE)## Reading layer `sn_100000' from data source
## `C:\Users\wenzler\Documents\GitHub\d6_teaching_collection\data\data_borneo\geo_vector'
## using driver `ESRI Shapefile'
## Simple feature collection with 396 features and 4 fields
## Geometry type: LINESTRING
## Dimension: XY
## Bounding box: xmin: 108.9454 ymin: -3.787917 xmax: 118.7879 ymax: 6.464583
## Geodetic CRS: WGS 84
Load point df and convert to spatial object
These are our observations of the species.
### The spatial point data (species records) (repetition)
# filename
spec_pt_filename <- list.files(path = recs_wd,
pattern = ".csv",
full.names = TRUE)#paste(recs_wd,'/','viverra_tangalunga.csv', sep='')
spec_pt_filename## [1] "C:/Users/wenzler/Documents/GitHub/d6_teaching_collection/data/data_borneo/animal_data/DHOsim.csv"
## [2] "C:/Users/wenzler/Documents/GitHub/d6_teaching_collection/data/data_borneo/animal_data/MyNewSpecies.csv"
## [3] "C:/Users/wenzler/Documents/GitHub/d6_teaching_collection/data/data_borneo/animal_data/Pardofelis_marmorata.csv"
## [4] "C:/Users/wenzler/Documents/GitHub/d6_teaching_collection/data/data_borneo/animal_data/PPLsim.csv"
## [5] "C:/Users/wenzler/Documents/GitHub/d6_teaching_collection/data/data_borneo/animal_data/RIVERsim.csv"
## [6] "C:/Users/wenzler/Documents/GitHub/d6_teaching_collection/data/data_borneo/animal_data/viverra_tangalunga.csv"
# read the file
sp_recs <- read.csv(file = spec_pt_filename[1], # decide which species you want to use, here 1st in list
header=TRUE, sep=',')
#convert it to spatial object (sf here)
sp_recs_sf <- sf::st_as_sf(x = sp_recs,
coords = c("long","lat"), # columns for the coordinates
crs = 4326, # define crs, 4326 is the EPSG code
sf_column_name = "geometry",
remove=F) # sf needs a geometry column and you have to name itPreconditions
Checking for multicollinearity
## workaround for slow computers. First, aggregate the 1 km² resolution into 50*50 km
agg_pred <- terra::aggregate(x=predictors,fact=50,FUN=mean)
plot(agg_pred)Pairs plot
## Plot the differences (residuals) between the rasters:
diff <- terra::focalPairs(x = agg_pred, w = 3, 'pearson', na.rm = TRUE)
plot(diff)NB: I am a big fan of the pairs()-plot because you actually see the
distribution of the data, but you can do really nice plots with the
{corrplot}-package:
https://cran.r-project.org/web/packages/corrplot/vignettes/corrplot-intro.html
Sampling bias correction
We are now creating a very rudimentary bias correction file. Let’s assume that doing field work in the province of Sabah, Malaysia, is easy and the field sampling has concentrated on these areas. Further, it is easier to sample in lowlands and flat slopes. Usually, protected areas (PAs) are also well studied areas, where the sampling intensity is high. And let’s assume that it was not possible to get a research permit for Indonesia and that therefore very few field studies had been conducted here. Please note: this is a completely fictive example! You should have the information about the sampling bias.
That is, we create a file that is representing these differences in sampling effort. This is easiest to do with raster manipulation. We first create a raster from our shapefile with the country outlines.
## Simple feature collection with 6 features and 7 fields
## Geometry type: MULTIPOLYGON
## Dimension: XY
## Bounding box: xmin: 115.8098 ymin: 4.27081 xmax: 119.2697 ymax: 7.013332
## Geodetic CRS: WGS 84
## ID_0 ISO NAME_0 NAME_1 NAME_2 Shape_Leng Shape_Area
## 1 158 MYS Malaysia Sabah Lahad Datu 6.7475367 0.62106012
## 2 158 MYS Malaysia Sabah Papar 1.6727672 0.10065591
## 3 158 MYS Malaysia Sabah Penampang 0.9548857 0.03951545
## 4 158 MYS Malaysia Sabah Pensiangan 4.0257859 0.49725547
## 5 158 MYS Malaysia Sabah Pitas 2.9223456 0.11955352
## 6 158 MYS Malaysia Sabah Ranau 2.4815704 0.24027591
## geometry
## 1 MULTIPOLYGON (((118.2299 4....
## 2 MULTIPOLYGON (((116.0144 5....
## 3 MULTIPOLYGON (((116.0477 5....
## 4 MULTIPOLYGON (((116.5357 5....
## 5 MULTIPOLYGON (((117.2762 6....
## 6 MULTIPOLYGON (((116.9182 6....
We create a raster that gets the value of 1 for Malaysia (and Brunei) and a value of 0.01 (MaxEnt cannot deal with 0 -> see lecture) for the rest. We use one of our rasters as mask for cell size resolution and cropping extent:
### *Rasterize* the shapefiles
## command rasterize()
## field = 1 means all raster cells get value = 1, other cell values are set to 0
## background sets all other cells to a chosen number
Mal_ras <- terra::rasterize(x=Borneo_shp_sf[Borneo_shp_sf$NAME_0 %in%
c("Brunei","Malaysia"),], y=ras24, field=1,
background=0.1)
plot(Mal_ras)# almost same as not plotting Indonesia:
# NotInd_ras <- rasterize(x=Borneo_shp_sf[Borneo_shp_sf$NAME_0 !=
# "Indonesia",], y=ras24, field=1, background=0.01)We do the same only for the province of Sabah and the Protected Areas
Sab_ras <- terra::rasterize(x=Borneo_shp_sf[Borneo_shp_sf$NAME_1 == 'Sabah',],
y=ras24, field=1, background=0.1)
PA_ras <- terra::rasterize(x=PA_shp_sf, y=ras24, field=1, background=0.1)
### Crosscheck with plot
par(mfrow=c(1,2))
plot(Sab_ras)
plot(PA_ras) #n.b.: only 1 and small values, no NAAnd we do the same for all elevations below 300 m:
Now we add them all up. This means, we rank the sampling intensity, i.e. sampling in protected areas in Sabah below 300 m gets the highest value
## class : SpatRaster
## dimensions : 1425, 1423, 1 (nrow, ncol, nlyr)
## resolution : 0.008333334, 0.008333334 (x, y)
## extent : 108.3341, 120.1925, -4.374013, 7.500988 (xmin, xmax, ymin, ymax)
## coord. ref. : lon/lat WGS 84 (CRS84) (OGC:CRS84)
## source(s) : memory
## varname : bio_asc_24
## name : bio_asc_24
## min value : 0.3
## max value : 4.0
Let’s standardize between 0 and 1 (and then set the zeros to 0.01 again)
# get the maximum value in the layer, standardize it and round to two decimals
maxval <- max(values(bias_tmp),na.rm=T)
bias_tmp2 <- bias_tmp/maxval
bias_tmp3 <- round(bias_tmp2, digits=2)
## same as in one go:
bias1 <- round(bias_tmp/max(values(bias_tmp),na.rm=T),digits=2)
table(values(bias1)) ##
## 0.08 0.3 0.33 0.53 0.55 0.75 0.78 1
## 89415 107956 423878 39157 146727 5611 49196 4634
# table(bias1@data@values) # is doing the same
### in case of having 0 somewhere:
bias1[values(bias1 == 0)] <- 0.01 # because MaxEnt
# does not take 0!
table(values(bias1))##
## 0.08 0.3 0.33 0.53 0.55 0.75 0.78 1
## 89415 107956 423878 39157 146727 5611 49196 4634
Now compare the rasters - do they have the same extent, cell size etc?
## [1] TRUE
check for the length of the NODATA values:
## [1] 866574
## [1] 866574
Nice! Now plot it:
Save the bias file
## save into Output folder, which is your working directory:
## check with getwd() if you're not sure of wd
## in case you want it aggregated
# bias_agg <- aggregate(bias1, fact=50,FUN=mean)
# writeRaster(bias_agg, filename="bias_agg.asc", datatype='ascii',
# overwrite=TRUE,NAflag=-9999)
## since I used setwd() to the output folder, it is automatically stored there,
## if not, paste the path to the filename, i.e.
## filename=paste0(output_wd,"/bias_2023.asc") #or # here(output_wd,"bias_2023.asc")
getwd()## [1] "C:/Users/wenzler/Documents/GitHub/d6_teaching_collection/R/Course3_a_presence_only_sdm"
Now, create folders for the MaxEnt output by hand or do it with the following code. We want a subfolder in our output-folder, called MaxEntRes, with two subfolders called ‘bias’ and ‘nobias’.
dir.create(path = file.path(output_wd, "MaxEntRes"), showWarnings = FALSE)
dir.create(path = file.path(output_wd, "/MaxEntRes", "nobias_current"),showWarnings = FALSE)
dir.create(path = file.path(output_wd, "/MaxEntRes", "nobias_future"),showWarnings = FALSE)
dir.create(path = file.path(output_wd, "/MaxEntRes", "withbias_current"),showWarnings = FALSE)
dir.create(path = file.path(output_wd, "/MaxEntRes", "withbias_future"),showWarnings = FALSE)Before we work with MaxEnt, please inspect the header of your bias
file by opening it in a normal text editor. Mine looks like:
NCOLS 1423
NROWS 1425
XLLCORNER 108.334122887
YLLCORNER -4.374012999
CELLSIZE 0.0083333337997189
NODATA_value -9999
If this throws an error in MaxEnt, please change the
CELLSIZE
from 0.0083333337997189 to 0.0083333338 by hand in any
text editor! That is, the header should read:
ncols 1423
nrows 1425
xllcorner 108.33412288693
yllcorner -4.3740129986359
cellsize 0.0083333338
NODATA_value
-9999
(Please, without ‘
’ if you use copy-paste from my .rmd course
script…..)
Running MaxEnt from the GUI
+++++++++++++++++++++++++++++++++++++++++++++++++
Exercise:
Please run MaxEnt manually first by opening the maxent.jar (double click
-> then the grey GUI should open after some seconds) - see lecture.
This helps you to understand the principles first before running the
code in R.
+++++++++++++++++++++++++++++++++++++++++++++++++
Running MaxEnt in R
select predictors
Here you can select which predictors you want to use. Please remember
from the above exercise that current and future maps need to carry the
same name, but in different folders, and that you must have the future
maps for those variables that you included to fit the model to current
conditions! That means, if you have fitted the model to current
temperatures, you need to have maps of future temperatures as well (with
the same name(s)) if you want to project the model to future climates.
The following example only builds on those files that are available
in the repository, that is, the example runs on files bio_asc_01,
bio_asc_21, bio_asc_22, bio_asc_24, bio_asc_27 and bio_asc_42. If you
use the full dataset provided by the external link, please make sure
that your predictor variables correspond.
## [1] "C:/Users/wenzler/Documents/GitHub/d6_teaching_collection/data/data_borneo/geo_raster_current_asc/bio_asc_01.asc"
## [2] "C:/Users/wenzler/Documents/GitHub/d6_teaching_collection/data/data_borneo/geo_raster_current_asc/bio_asc_21.asc"
## [3] "C:/Users/wenzler/Documents/GitHub/d6_teaching_collection/data/data_borneo/geo_raster_current_asc/bio_asc_22.asc"
## [4] "C:/Users/wenzler/Documents/GitHub/d6_teaching_collection/data/data_borneo/geo_raster_current_asc/bio_asc_24.asc"
## [5] "C:/Users/wenzler/Documents/GitHub/d6_teaching_collection/data/data_borneo/geo_raster_current_asc/bio_asc_27.asc"
## [6] "C:/Users/wenzler/Documents/GitHub/d6_teaching_collection/data/data_borneo/geo_raster_current_asc/bio_asc_42.asc"
## [7] "C:/Users/wenzler/Documents/GitHub/d6_teaching_collection/data/data_borneo/geo_raster_current_asc/borneo_hillshade.asc"
Maxent without sampling bias correction
Here, the whole area that is given by the raster is sampled equally (background). Predictors are used as stack in the following MaxEnt command. Also, don’t forget to mark categorical variables and to not select variables which do not contain biological information, like the hillshade, as we did above.
MaxEnt without bias current
xm_nobias_current <- predicts::MaxEnt(
x = predictors_selected,
p = sp_recs[,2:3], # x and y coordinates
factors = 'bio_asc_42', # raster names which are factors
nbg = nrow(sp_recs) * 10, # number of background points, here ten times
path = file.path(output_wd, "/MaxEntRes", "nobias_current"), # output path
args = c("replicates=3",
"outputformat=logistic",
"responsecurves") # number of replicates and output format
)
saveRDS(xm_nobias_current, file.path(output_wd, "/MaxEntRes", "nobias_current", "maxent_nobias_current.rds"))Advanced: For the whole list of arguments that can be set in MaxEnt
see:
https://github.com/shandongfx/workshop_maxent_R/blob/master/code/Appendix1_case_study.md
predict without bias current
pred_no_bias_current <- terra::mean(predicts::predict(xm_nobias_current, predictors_selected))
terra::writeRaster(pred_no_bias_current,
file.path(output_wd, "/MaxEntRes", "nobias_current", "species_nobias_cur_avg.asc"),
overwrite = TRUE)
## a little sneak preview:
terra::plot(pred_no_bias_current, col=viridis(100))MaxEnt without bias future
xm_nobias_future <- predicts::MaxEnt(
x = predictors_selected, # need to rereun (fit) MAxEnt to current conditions
p = sp_recs[,2:3], # x and y coordinates
factors = 'bio_asc_42', # raster names which are factors
nbg = nrow(sp_recs) * 10, # number of background points, ten times
path = file.path(output_wd, "/MaxEntRes", "nobias_future"), # output path
args = c("replicates=3",
"outputformat=logistic",
"responsecurves",
paste0("projectionlayers=", maps_wd_fut))
)
saveRDS(xm_nobias_future, file.path(output_wd, "/MaxEntRes", "nobias_future", "maxent_nobias_future.rds"))## sneak preview - ascii file should be in folder:
## check for the average '_avg' file out of all single repetitions:
fut_avg_nobias <- list.files(here::here("output","MaxEntRes","nobias_future"),pattern='_avg.asc$',
full.names=TRUE )
fut_avg_nobias## [1] "C:/Users/wenzler/Documents/GitHub/d6_teaching_collection/output/MaxEntRes/nobias_future/species_geo_raster_future_asc_avg.asc"
## NB: the future plot might not look very different from the current one,
## because not many changing predictor variables were used.
## create a residual plot subtracting current from future
## as the output of MaxEnt is a RasterLAyer,
## it needs to be transformed to a terra::SpatRaster
res_plot <- sneak - pred_no_bias_current
plot(res_plot, col= inferno(100))MaxEnt with sampling bias correction
Create background points
The prob = T argument samples points given by the probability values of the map. I suggest to actually do this INSIDE the MAxEnt command to have varying random points in space for each run, but doing it once outside the MaxEnt command line makes the code faster.
MaxEnt with bias current
xm_withbias_current <- predicts::MaxEnt(
x = predictors_selected,
p = sp_recs[,2:3], # x and y coordinates
factors = 'bio_asc_42', # raster names which are factors
a = random_pts, # bias - background points
path = file.path(output_wd, "/MaxEntRes", "withbias_current"), # output path
args = c("replicates=3",
"outputformat=logistic",
"responsecurves") # number of replicates and outputformat
)
saveRDS(xm_withbias_current, file.path(output_wd, "/MaxEntRes", "withbias_current", "maxent_withbias_current.rds"))predict with bias current
pred_with_bias_current <- terra::mean(predicts::predict(xm_withbias_current, predictors_selected))
terra::writeRaster(pred_with_bias_current,
file.path(output_wd, "/MaxEntRes", "withbias_current", "species_bias_cur_avg.asc"),
overwrite = TRUE)
## a little sneak preview:
terra::plot(pred_with_bias_current, col=viridis(100))MaxEnt with bias future
xm_withbias_future <- predicts::MaxEnt(
x = predictors_selected,
p = sp_recs[,2:3], # x and y coordinates
factors = 'bio_asc_42', # raster names which are factors
a = random_pts, # bias - background points
path = file.path(output_wd, "/MaxEntRes", "withbias_future"), # output path
args = c("replicates=3",
"outputformat=logistic",
"responsecurves",
paste0("projectionlayers=", maps_wd_fut))
)
saveRDS(xm_withbias_future, file.path(output_wd, "/MaxEntRes", "withbias_future", "maxent_withbias_future.rds"))CONTINUE HERE after results generated
Since we now have results produced irrespective of whether we used R or the GUI for MaxEnt, we can have a look at the results produced. Please listen to the lecture to understand the type of results we are exploring in the following.
Accessing MaxEnt results
Have a look at the MaxEnt layers and results produced in the folder MaxEntRes. MaxEnt saves the average of x repetitions (remember, we ran 3 repetitions) in a file with ’_avg.asc’ at the end of the name.Please take care that you have results in each of the four folders under MaxEntRes, and that the folders are built in the way we did it above.
# new argument 'recursive' means, that also subfolders are checked!
infiles <- list.files(path=paste(output_wd,'/MaxEntRes',
sep=''),pattern='_avg.asc$',
full.names=TRUE,recursive=TRUE )
infiles## [1] "C:/Users/wenzler/Documents/GitHub/d6_teaching_collection/output/MaxEntRes/nobias_current/species_nobias_cur_avg.asc"
## [2] "C:/Users/wenzler/Documents/GitHub/d6_teaching_collection/output/MaxEntRes/nobias_future/species_geo_raster_future_asc_avg.asc"
## [3] "C:/Users/wenzler/Documents/GitHub/d6_teaching_collection/output/MaxEntRes/withbias_current/species_bias_cur_avg.asc"
## [4] "C:/Users/wenzler/Documents/GitHub/d6_teaching_collection/output/MaxEntRes/withbias_future/species_geo_raster_future_asc_avg.asc"
# example
#[1] "c:/Users/kramer/Dropbox/.../MaxEntRes
# /nobias/river_dummy_avg.asc"
#[2] "c:/Users/kramer/Dropbox/.../MaxEntRes
# /nobias/river_dummy_FutureMaps_avg.asc"
#[3] "c:/Users/kramer/Dropbox/.../MaxEntRes
# /withbias/river_dummy_avg.asc"
#[4] "c:/Users/kramer/Dropbox/.../MaxEntRes
# /withbias/river_dummy_FutureMaps_avg.asc"Stack results file and make usual plot
## IF you have a slow computer,
## please use the workaround with function aggregate() from above:
# me_stack <- aggregate(c(infiles[c(1:4)]),fact= 50, FUN = mean )
me_stack <- c(rast(infiles[[1]]),
rast(infiles[[2]]),
rast(infiles[[3]]),
rast(infiles[[4]]))
# name sequence according to infiles list above
names(me_stack) <- c('curr_noBias','fut_noBias',
'curr_withBias','fut_withBias')
#par(mar=c(4,4,3,3), oma=c(2,2,2,2))
plot(me_stack,col=viridis(100)) # note: I might use a different example than you did in MaxEntCheck the range of the suitability values
How would you interpret the results?
Threshold selection
You should have results for the runs with a bias file and without in a .csv file. When you run MaxEnt via the GUI, you receive only two results files, because the model is fitted to the data of the current condition; in R you will receive four files, simply because running MaxEnt with R for future maps is a re-fit of the model to current conditions. The files should be identical, so it is enough to only save the current results file.
# Use maxentResults.csv to access threshold value
# column: 10th percentile training presence logistic threshold
me_res <- list.files(path=paste(output_wd,'/MaxEntRes',sep=''),
pattern='maxentResults.csv',
full.names=TRUE,recursive=TRUE )
me_res## [1] "C:/Users/wenzler/Documents/GitHub/d6_teaching_collection/output/MaxEntRes/nobias_current/maxentResults.csv"
## [2] "C:/Users/wenzler/Documents/GitHub/d6_teaching_collection/output/MaxEntRes/nobias_future/maxentResults.csv"
## [3] "C:/Users/wenzler/Documents/GitHub/d6_teaching_collection/output/MaxEntRes/withbias_current/maxentResults.csv"
## [4] "C:/Users/wenzler/Documents/GitHub/d6_teaching_collection/output/MaxEntRes/withbias_future/maxentResults.csv"
# example
#[1] "c:/Users/kramer/Dropbox/.../MaxEntRes
# /nobias/maxentResults.csv"
#[2] "c:/Users/kramer/Dropbox/.../MaxEntRes
# /withbias/maxentResults.csv"Now we do some elegant trick: we store both files as two objects in a list! Similar to stacking rasters, a bit….
## Please choose here:
store_res <- lapply(me_res,FUN=read.csv) ## store as list obj. when run from GUI
store_res <- lapply(me_res[c(1,3)],FUN=read.csv) ## when run from R
## Check the following out:
# head(store_res) # list object with 2 slots
# str(store_res)
# store_res[[1]]We are searching for the column with the threshold.
## Extract the values at column '10th percentile' at position nrep +1
## (= nrow command) to access avg value of all repetitions at the lowest row
zename<-'X10.percentile.training.presence.Logistic.threshold'
## Note! :
## In MaxEnt 3.4.1 the column name is:
## 'X10.percentile.training.presence.Logistic.threshold'
## with capital L in 'logistic', in older versions with 'l'.
zename## [1] "X10.percentile.training.presence.Logistic.threshold"
Now check which column number that is….
## [1] 51
… and extract the threshold value :
## a little head twister:
## access each element of your list, which is a whole dataset with store_res[[1]]
## and inside this list object = data.frame, access the element of
## the last row (nrow), and the column zecol containing the threshold value,
## because the results are stored for each repetition, and the last column contains
## the average value
t_noBias <- store_res[[1]][nrow(store_res[[1]]),zecol]
t_withBias <- store_res[[2]][nrow(store_res[[2]]),zecol]
t_noBias #the thresholds## [1] 0.3625
## [1] 0.387
Now apply the thresholds to your MaxEnt output maps of relative probability. For that, store the thresholds in a vector, and double them for bias/ nobias, because the threshold stays the same for current and future maps (see above):
## [1] 0.3625 0.3625 0.3870 0.3870
Now set the thresholds to the stack. If you don’t remember the stack (me_stack), plot it again. Test different thresholds and see how the predictions change:
Analyses
Now let’s check how global change affects the protected areas. Does the suitability of these areas increase or decrease in the future? Extract relative probability values (habitat suitability) inside protected areas PAs and make a boxplot for comparison.
## check if overlay correct - only works when you have NOT aggregated the maps
terra::compareGeom(PA_ras,me_stack[[1]]) ## [1] TRUE
## extract
ex <- which(values(PA_ras)==1) ##remember PA_ras == 1 where the PAs are
## ex # gives Pointer to elements/ Index
ex_stack <- terra::extract(x=me_stack, y=ex)
head(ex_stack)## curr_noBias fut_noBias curr_withBias fut_withBias
## 1 NA NA NA NA
## 2 0.0011197921 0.001269990 2.361548e-04 3.07386e-05
## 3 0.0004946960 0.000529586 1.733264e-04 4.35029e-05
## 4 0.0006508699 0.000678241 1.230861e-04 5.65338e-05
## 5 0.0003685336 0.000376808 1.551008e-04 3.38545e-05
## 6 0.0001942456 0.000186320 5.513565e-05 2.72841e-05
Plot the difference of habitat suitability in scenarios and PAs
Calculate no. of cells (=area) of suitable habitat in PAs, that we defined via the binary threshold maps.
## curr_noBias fut_noBias curr_withBias fut_withBias
## 1 NA NA NA NA
## 2 FALSE FALSE FALSE FALSE
## 3 FALSE FALSE FALSE FALSE
## 4 FALSE FALSE FALSE FALSE
## 5 FALSE FALSE FALSE FALSE
## 6 FALSE FALSE FALSE FALSE
## curr_noBias fut_noBias curr_withBias fut_withBias
## 46892 53211 47519 33686
Interestingly: when correcting for sampling bias, the future
scenarios have the lowest number of suitable habitat inside PAs (in my
species):
curr_noBias fut_noBias curr_withBias fut_withBias
43285 63905 43430 39989
Least cost path for connectivity - spaths
#install.packages('spaths')
library(spaths)
#?spaths
## Set start and end points: centers of the first two PA polygons
start <- st_centroid(PA_shp_sf[1,1])
end <- st_centroid(PA_shp_sf[2,1])
## Several commands in one line when separated by ';':
plot(me_stack[[1]]); points(start, col = "red"); points(end, col = "orange")## Clip raster to save computation time
cr_extent <- c(114,117,3,6.5)
me_cr <- terra::crop(x=me_stack[[1]],y=cr_extent)
plot(me_cr); points(start,pch=15, col = "red"); points(end,pch=15, col = "orange")### Necessary calculations
## check out help first
#?spaths::shortest_paths
# calculate shortest path
sPath_v <- spaths::shortest_paths(rst = me_cr,
origins = start,
destinations = end,
output = "both", # get lines and distances
contiguity = "queen", # check diffenerce by using "queen" or "rook"
tr_fun = function(d, v1, v2) d / (v1 + v2)) # similar to mean
# transform to sf
sPath_sf <- st_as_sf(sPath_v)
# length
st_length(sPath_sf)## 335348.7 [m]
## Make a plot
plot(me_cr,col=viridis(100))
points(start,pch=17,col='white'); points(end,pch=15, col='white')
plot(PA_shp_sf[,1], border='white', col='transparent',lwd=0.5,add=T)
lines(sPath_sf[,1],lwd=1, col= 'red')old not run
Least cost path for connectivity
#install.packages('gdistance')
library(gdistance)
#?gdistance
# we need a workaround, because we need the PAs now as SpatialPolygonDataFrame,
# and not as sf-Object (gdistance works with old format)
PA_shp_sp <- as(PA_shp_sf, "Spatial")
## Set start and end points: centers of the first two PA polygons
start <- st_centroid(PA_shp_sf[1,1])
end <- st_centroid(PA_shp_sf[2,1])
## Several commands in one line when separated by ';':
plot(me_stack[[1]]); points(start, col = "red"); points(end, col = "black")
## Clip raster to save computation time
cr_extent <- c(114,117,3,6.5)
me_cr <- crop(x=me_stack[[1]],y=cr_extent)
plot(me_cr); points(start,pch=15, col = "red"); points(end,pch=15, col = "black")
### Necessary calculations
## Calculate the transition layer
## but first, we need to backtransform the SpatRaster {terra}-Object
## into a RasterLayer {raster}-Object:
me_cr_raster <- raster::raster(me_cr)
trans <- gdistance::transition(x = me_cr_raster,
transitionFunction = mean,
directions = 4,
symm = F)
class(trans)
## Calculate the shortest weighted connection
sPath_sp <- gdistance::shortestPath(trans, as(start, "Spatial"), as(end, "Spatial"),
output="SpatialLines")
terra::crs(sPath_sp) <- "+proj=longlat +datum=WGS84 +no_defs"
## not needed any more
## Calculate the length of the path
gdistance::costDistance(trans, as(start, "Spatial"), as(end, "Spatial")) #units?
## Make a plot
plot(me_cr,col=viridis(100))
points(start,pch=17,col='white'); points(end,pch=15, col='white')
plot(PA_shp_sf, border='white', col='transparent',lwd=0.5,add=T)
lines(sPath_sp,lwd=1, col= 'red')run from here again
## where does the animal corridor intersect rivers?
## in sf package
River_inter1 <- sf::st_intersection(x = River_shp_sf, y = sPath_sf)
plot(sf::st_geometry(sPath_sf),lwd=3) # we need st_geometry to have a good plot
plot(PA_shp_sf[,1],border='grey',lwd=1,add=T)
plot(River_shp_sf[,1],col='blue',lwd=2,add=T)
plot(River_inter1[,1],col='red',lwd=4,add=T)
box()## Which connections intersect the PAs?
## as PA_shp_sf has an invalid topology, we need the workaround with the old
## {rgeos}package; https://github.com/r-spatial/sf/issues/1902
PA_inter1 <- sf::st_intersection(sf::st_make_valid(PA_shp_sf), sPath_sf) # old way
plot(sf::st_geometry(sPath_sf),lwd=3) # we need st_geometry to have a good plot
plot(PA_shp_sf,border='blue', col='transparent', lwd=2,add=T)
lines(PA_inter1,col='red',lwd=4); box()### Save new line as shapefile
sf::st_write(obj = sPath_sf, dsn = paste0(output_wd,'/costpath_line.shp'), delete_layer = TRUE)## Deleting layer `costpath_line' using driver `ESRI Shapefile'
## Writing layer `costpath_line' to data source
## `C:/Users/wenzler/Documents/GitHub/d6_teaching_collection/output/costpath_line.shp' using driver `ESRI Shapefile'
## Writing 1 features with 3 fields and geometry type Line String.